02 PyTorch basic Tensor operations


In [1]:
% reset -f
from __future__ import print_function
from __future__ import division
import math
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline

import torch
import sys
print('__Python VERSION:', sys.version)
print('__pyTorch VERSION:', torch.__version__)
print('__CUDA VERSION')
from subprocess import call
# call(["nvcc", "--version"]) does not work
! nvcc --version
print('__CUDNN VERSION:', torch.backends.cudnn.version())
print('__Number CUDA Devices:', torch.cuda.device_count())
print('__Devices')
# call(["nvidia-smi", "--format=csv", "--query-gpu=index,name,driver_version,memory.total,memory.used,memory.free"])
print('Active CUDA Device: GPU', torch.cuda.current_device())

print ('Available devices ', torch.cuda.device_count())
print ('Current cuda device ', torch.cuda.current_device())


__Python VERSION: 2.7.12 (default, Nov 19 2016, 06:48:10) 
[GCC 5.4.0 20160609]
__pyTorch VERSION: 0.2.0_2
__CUDA VERSION
nvcc: NVIDIA (R) Cuda compiler driver
Copyright (c) 2005-2016 NVIDIA Corporation
Built on Tue_Jan_10_13:22:03_CST_2017
Cuda compilation tools, release 8.0, V8.0.61
__CUDNN VERSION: 6021
__Number CUDA Devices: 1
__Devices
Active CUDA Device: GPU 0
Available devices  1
Current cuda device  0

Numpy vs PyTorch Syntax

Numpy Pytorch

  • np.zeros((2, 3)) torch.zeros(2,3)
  • np.random.rand(2, 3) torch.rand(2,3)
  • x.reshape(1, -1) x.view(1, -1)
  • x.shape x.size()
  • x.dot(w) x.mm(w)
  • x.matmul(w) x.bmm(w)
  • x.T x.t()
  • x.transpose(0, 2, 1) x.permute(0, 2, 1)
  • x.argmax(axis=1) _, i = x.max(dim=1)
  • np.sum(x, axis=1) torch.sum(x, dim=1)
  • np.maxium(x, 0) torch.clamp(x, min=0)
  • x.clone() x.copy()

Torch Tensors


In [3]:
from __future__ import print_function
import torch
from torch.autograd import Variable

In [4]:
x=torch.Tensor(3,2)
print (type(x))
print (x)

# how variables work
x = Variable(x)
print ("x:" + str (x))
print ("requires grad:" + str(x.requires_grad))
print ("data:" + str(x.data))


<class 'torch.FloatTensor'>

-1.7305e+02  4.5402e-43
-8.5831e-01  4.5402e-43
-2.7299e+12  4.5402e-43
[torch.FloatTensor of size 3x2]

x:Variable containing:
-1.7305e+02  4.5402e-43
-8.5831e-01  4.5402e-43
-2.7299e+12  4.5402e-43
[torch.FloatTensor of size 3x2]

requires grad:False
data:
-1.7305e+02  4.5402e-43
-8.5831e-01  4.5402e-43
-2.7299e+12  4.5402e-43
[torch.FloatTensor of size 3x2]


In [5]:
x=torch.rand(3,4)
print (type(x))
print (x)


<class 'torch.FloatTensor'>

 0.8346  0.2928  0.3545  0.3185
 0.8032  0.3171  0.5971  0.9996
 0.5166  0.2219  0.3525  0.7987
[torch.FloatTensor of size 3x4]


In [6]:
print (x[1:])


 0.8032  0.3171  0.5971  0.9996
 0.5166  0.2219  0.3525  0.7987
[torch.FloatTensor of size 2x4]


In [7]:
x.numpy()


Out[7]:
array([[ 0.83460784,  0.29282814,  0.35453677,  0.31852531],
       [ 0.80323261,  0.3171213 ,  0.59707141,  0.99962288],
       [ 0.51662469,  0.22191475,  0.35245907,  0.79872996]], dtype=float32)

In [8]:
if torch.cuda.is_available():
    x = x.cuda()*2 
    
print (type(x))
print (x)


<class 'torch.cuda.FloatTensor'>

 1.6692  0.5857  0.7091  0.6371
 1.6065  0.6342  1.1941  1.9992
 1.0332  0.4438  0.7049  1.5975
[torch.cuda.FloatTensor of size 3x4 (GPU 0)]


In [ ]:


In [ ]: